Introduction

The goal of study 1 was to…

Methods

library(tidyverse)
library(ggdist)
library(ggside)
library(easystats)
library(patchwork)
library(brms)

df <- read.csv("data/study1.csv") |>
  mutate(
    Date = as.Date(Date),
    Participant = fct_reorder(Participant, Date),
    Screen_Refresh = as.character(Screen_Refresh),
    Illusion_Side = as.factor(Illusion_Side),
    Block = as.factor(Block),
    Education = fct_relevel(Education, "Master", "Bachelor", "High School", "Other")
  )

Exclusions

# plot(estimate_density(dfraw[dfraw$Participant == "60684f29dbfe1bb2059e5e27_rkqoy", "RT"]))

# Dear participant, thank you for participating in our study. Unfortunately, our system detected multiple issues in your data (such as implausibly short responses, and random-like pattern of answers), which make the data unusable for us. We understand that you might have been in a hurry, or had some other issues, and kindly ask you to return your participation. We hope to open-up more slots in the future would you be interested to participate again. 

outliers <- c(
  # Half of the trials have very short RT
  # Prolific Status: REJECTED
  "60684f29dbfe1bb2059e5e27_rkqoy",
  # Block n2 with very short RTs
  # Prolific Status: RETURNED
  "5e860198a846e30497df5189_6e43s",
  # Error rate of 46.2% and short RTs
  # Prolific Status: RETURNED
  "615f319bb341cf2f306d858d_qsaq5",
  # Error rate of 46.2%
  # Prolific Status: RETURNED
  "614f36fd81c78b7a125c4262_6ax4g",
  # Error rate of 47.9%
  # Prolific Status: RETURN REQUESTED
  "61280140171ec546e87ed8cb_qdlgy",
  # Error rate of 42.1% and very large RT SD
  # Prolific Status: RETURN REQUESTED
  "5d398380b37ab1000111fac3_2nxxh"
)

We removed 6 participants upon inspection of the average error rage (when close to 50%, suggesting random answers) and/or when the reaction time distribution was implausibly fast.

For each block, we computed the error rate and, if more than 50%, we discarded the whole block (as it likely indicates that instructions got mixed up, for instance participants were selecting the smaller instead of the bigger circle).

Error Rate

dfsub <- df |>
  group_by(Participant) |>
  summarize(
    # n = n(),
    Error = sum(Error) / n(),
    RT_Mean = mean(RT),
    RT_SD = sd(RT),
  ) |>
  ungroup() |>
  arrange(desc(Error))

knitr::kable(dfsub) |> 
  kableExtra::row_spec(which(dfsub$Participant %in% outliers), background  = "#EF9A9A")
Participant Error RT_Mean RT_SD
61280140171ec546e87ed8cb_qdlgy 0.479 262 296
615f319bb341cf2f306d858d_qsaq5 0.465 263 372
614f36fd81c78b7a125c4262_6ax4g 0.462 630 611
5d398380b37ab1000111fac3_2nxxh 0.421 507 1679
5e860198a846e30497df5189_6e43s 0.402 492 725
6104696d120a50b0ec51aa1f_m56p5 0.300 723 579
61572ca3e91309ebe876a9db_8cqnp 0.287 659 333
5d9091ff391a60058a7711b5_dvz9e 0.269 578 172
5bb511c6689fc5000149c703_r4kjk 0.262 716 271
5ab308940527ba0001c15a9f_rnclh 0.262 588 206
6106b7157977b80c497314f8_d7ukm 0.260 718 1294
61088fcf393bde58359957b3_ol8jp 0.255 837 711
60684f29dbfe1bb2059e5e27_rkqoy 0.251 599 1326
611eb7284490ba01cddfbe98_om6zf 0.246 699 414
5bc84a5f0760100001b3b9de_4cxmv 0.246 560 197
60d129f2a122e84175a56425_z2w8h 0.243 693 232
5d7389f193a945001a3ea504_nhua6 0.238 1160 1660
60dae077e62179eb469e32a4_b4pte 0.227 748 243
5c6b0a27ffc824000191c7d8_5ajt1 0.225 780 427
5f2f0ac775b91d2e1865c1cc_xkcs9 0.219 785 836
5ff46a1a99e7cfb2994f7958_f2zg0 0.216 506 150
5f19559b9665f700090276c4_hsmss 0.215 738 375
5c8ab0f10de08f00016e43a1_pyvgt 0.213 1076 557
5ecd37ee75736a068808fa6c_7fgo5 0.210 870 489
6166a03f5063db088c458b73_m7w8f 0.207 804 378
606cd013f538ed55e02069b5_vr3v7 0.206 652 367
5f480e566265722a9b2b2660_0bola 0.205 511 147
605b60879326739b05897042_bpsyp 0.203 627 223
609193e5a0cea97bf00ac6e2_a6zcr 0.202 1133 982
610b0a1bf2434edb31592209_3f1wq 0.202 869 424
5f08583a3d61a604d606c517_o75t7 0.201 720 298
55eab7fd748092000daa98f2_f10fa 0.198 1110 738
612ba4de7e2b127155f4bb03_ph1f8 0.196 867 652
5e04595a4fa02aefdb9c9ced_n3rey 0.189 983 830
61114f10ae21c59c0ed3d106_jw6v8 0.187 711 195
5f14886922a7d20725a22cde_9awyt 0.186 803 397
60a6dd8779e3de1097e5d50a_t4wyc 0.185 846 765
5c73e5d89b46930001ee7edc_ydo84 0.182 1045 1082
5e84f2663a34f20c3907e237_rt0oo 0.181 1001 562
5ebde9baaefecd1325ef23c7_lphsv 0.176 1307 1091
5d59a9d909f4300001de0c3b_l125y 0.175 1146 900
563bb259be9cac0005aab7ab_te1z4 0.174 703 243
5fd97d5b40332e276ea58209_xmckd 0.173 1046 918
60ba6031b6dde7c5b869bf74_gqplc 0.173 616 381
5dfae1f373d7248254527108_0rb1e 0.171 927 550
60b8e0ec34553723e3d6504d_ju18r 0.170 769 312
5d5051e31025380015dc59b8_dwrdh 0.157 848 364
60366cfe9748fc2b0ccbc9d0_ox8hj 0.156 712 383
5bce155e561901000121006f_49472 0.142 1109 863
5ccc3dd7a758ba00133c0763_lwl1g 0.139 895 816
5a0b46e0844c7a00015e4d13_jedw6 0.124 741 331
5eb0205cac7ad4085dc32a50_5xekt 0.092 884 509

Error Rate per Illusion Block

temp <- df |>
  group_by(Participant, Illusion_Type, Block) |>
  summarize(ErrorRate_per_block = sum(Error) / n()) |>
  ungroup() |> 
  arrange(desc(ErrorRate_per_block))

temp2 <- temp |> 
  filter(ErrorRate_per_block >= 0.5) |> 
  group_by(Illusion_Type, Block) |> 
  summarize(n = n()) |> 
  arrange(desc(n), Illusion_Type) |> 
  ungroup() |> 
  mutate(n_trials = cumsum(n * 56),
         p_trials = n_trials / nrow(df))

# knitr::kable(temp2)

p1 <- temp |>
  estimate_density(at = c("Illusion_Type", "Block")) |>
  ggplot(aes(x = x, y = y)) +
  geom_line(aes(color = Illusion_Type, linetype = Block)) + 
  geom_vline(xintercept = 0.5, linetype = "dashed") +
  scale_x_continuous(expand = c(0, 0)) +
  scale_y_continuous(expand = c(0, 0)) +
  labs(y = "Distribution", x = "Error Rate") +
  theme_modern()

p2 <- temp2 |> 
  mutate(Block = fct_rev(Block)) |> 
  ggplot(aes(x = Illusion_Type, y = p_trials)) +
  geom_bar(stat="identity", aes(fill = Block)) +
  scale_x_discrete(expand = c(0, 0)) +
  scale_y_continuous(labels = scales::percent, expand = c(0, 0)) +
  labs(y = "Percentage of Trials Removed", x = "Illusion Type") +
  theme_modern() +
  theme(axis.text.x = element_text(angle=45, hjust = 1))

p1 | p2



# Drop
df <- df |>
  group_by(Participant, Illusion_Type, Block) |>
  mutate(ErrorRate_per_block = sum(Error) / n()) |>
  ungroup() |> 
  filter(ErrorRate_per_block < 0.5) |>
  select(-ErrorRate_per_block)

rm(temp, temp2)

Reaction Time Distribution

# RT distribution
p <- estimate_density(df, select = "RT", at = c("Participant", "Block")) |>
  group_by(Participant) |>
  normalize(select = "y") |>
  ungroup() |>
  mutate(color = ifelse(Participant %in% outliers, "red", "blue")) |>
  ggplot(aes(x = x, y = y)) +
  geom_area(data = normalize(estimate_density(df, select = "RT"), select = "y"), alpha = 0.2) +
  geom_line(aes(color = color, group = interaction(Participant, Block), linetype = Block)) +
  geom_vline(xintercept = 2500, linetype = "dashed", color = "red") +
  scale_color_manual(values=c("red"="red", "blue"="blue"), guide = "none") +
  scale_x_continuous(expand = c(0, 0)) +
  scale_y_continuous(expand = c(0, 0)) +
  coord_cartesian(xlim = c(0, 3000)) +
  theme_modern() +
  theme(axis.text.y = element_blank()) +
  facet_wrap(~Participant) +
  labs(y = "", x = "Reaction Time (ms)")
p

# ggsave("figures/outliers.png", p, width=20, height=15)

# Filter out
df <- filter(df, !Participant %in% outliers)

Reaction Time per Trial

p1 <- estimate_density(df, select = "RT", at = "Participant") |>
  group_by(Participant) |>
  normalize(select = "y") |>
  ungroup() |>
  ggplot(aes(x = x, y = y)) +
  geom_area(data = normalize(estimate_density(df, select = "RT"), select = "y"), alpha = 0.2) +
  geom_line(aes(color = Participant, group = Participant)) +
  geom_vline(xintercept = c(150, 3000), linetype = "dashed", color = "red") +
  scale_color_material_d("rainbow", guide = "none") +
  scale_x_continuous(expand = c(0, 0)) +
  scale_y_continuous(expand = c(0, 0)) +
  coord_cartesian(xlim = c(0, 3500)) +
  theme_modern() +
  theme(axis.text.y = element_blank()) +
  # facet_wrap(~Participant) +
  labs(y = "", x = "Reaction Time (ms)")


df$Outlier <- df$RT < 150 | df$RT > 3000

p2 <- df |>
  group_by(Participant) |>
  summarize(Outlier = sum(Outlier) / n()) |>
  mutate(Participant = fct_reorder(Participant, Outlier)) |>
  ggplot(aes(x = Participant, y = Outlier)) +
  geom_bar(stat = "identity", aes(fill = Participant)) +
  scale_fill_material_d("rainbow", guide = "none") +
  scale_x_discrete(expand = c(0, 0)) +
  scale_y_continuous(expand = c(0, 0), labels = scales::percent) +
  see::theme_modern() +
  theme(axis.text.x = element_blank())

p1 | p2

We removed 692 (1.37%) outlier trials (150 ms < RT < 3000 ms).

df <- filter(df, Outlier == FALSE)

Participants

dfsub <- df |>
  group_by(Participant) |>
  select(Participant, Age, Sex, Education, Nationality, Ethnicity, Duration, Break_Duration, Screen_Resolution, Screen_Refresh, Device_OS) |>
  slice(1) |>
  ungroup()

The final sample included 46 participants (Mean age = 26.7, SD = 7.7, range: [19, 60]; Sex: 39.1% females, 56.5% males, 4.3% other; Education: Master, 17.39%; Bachelor, 41.30%; High School, 39.13%; Other, 2.17%).

plot_distribution <- function(dfsub, what = "Age", title = what, subtitle = "", fill = "orange") {
  dfsub |>
    ggplot(aes_string(x = what)) +
    geom_density(fill = fill) +
    geom_vline(xintercept = mean(dfsub[[what]]), color = "red", linetype = "dashed") +
    scale_x_continuous(expand = c(0, 0)) +
    scale_y_continuous(expand = c(0, 0)) +
    ggtitle(title, subtitle = subtitle) +
    theme_modern() +
    theme(
      plot.title = element_text(face = "bold", hjust = 0.5),
      plot.subtitle = element_text(face = "italic", hjust = 0.5),
      axis.title.x = element_blank(),
      axis.title.y = element_blank(),
      axis.text.y = element_blank()
    )
}

plot_waffle <- function(dfsub, what = "Nationality") {
  ggwaffle::waffle_iron(dfsub, what) |>
    # mutate(label = emojifont::fontawesome('fa-twitter')) |>
    ggplot(aes(x, y, fill = group)) +
    ggwaffle::geom_waffle() +
    # geom_point() +
    # geom_text(aes(label=label), family='fontawesome-webfont', size=4) +
    coord_equal() +
    ggtitle(what) +
    labs(fill = "") +
    theme_void() +
    theme(plot.title = element_text(face = "bold", hjust = 0.5))
}
p1 <- plot_distribution(dfsub, "Age", fill = "#FF9800")
p2 <- plot_distribution(dfsub, "Duration", title = "Total Duration", subtitle = "in minutes", fill = "#F44336")
p3 <- plot_distribution(dfsub, "Break_Duration", title = "Break Duration", subtitle = "in minutes", fill = "#3F51B5")

p4 <- plot_waffle(dfsub, "Sex") +
  scale_fill_manual(values = c("Male" = "#2196F3", "Female" = "#E91E63", "Other" = "#FF9800"))

p5 <- plot_waffle(dfsub, "Education") +
  scale_fill_viridis_d()

p6 <- plot_waffle(dfsub, "Nationality") +
  scale_fill_metro_d()

p7 <- plot_waffle(dfsub, "Ethnicity") +
  scale_fill_manual(values = c("Latino" = "#FF5722", "Asian" = "#FF9800", "Caucasian" = "#2196F3", "African" = "#4CAF50", "Jewish" = "#9C27B0"))

p8 <- plot_waffle(dfsub, "Screen_Resolution") +
  scale_fill_pizza_d()

p9 <- plot_waffle(dfsub, "Device_OS") +
  scale_fill_bluebrown_d()

# p10 <- plot_waffle(dfsub, "Screen_Refresh") +
#   scale_fill_viridis_d()


(p1 / p2 / p3) | (p4 / p5 / p6) | (p7 / p8 / p9)

Data Analysis

The analysis of study focused on the probability of errors as the main outcome variable. For each illusion, we started by visualizing the average effect of task difficulty and illusion strength to gain some insight on the underlying generative model. Next, we tested the performance of various models differing in their specifications, such as: with or without a log-transform on the task difficulty, with or without a 2nd order polynomial term of the illusion strength, and with or without the illusion side (up vs. down or left vs. right) as an additional predictor. We then fitted the best performing model under a Bayesian model, and compared its predicted links with that of a General Additive Model (GAM), which has an increased ability of mapping underlying potentially non-linear relationships at the expense of model simplicity. Finally, we used these models to estimate different values of task difficulty that would lead to similar error rates (2.5, 5 and 25%) when the illusion strength is null.

Results

Delboeuf

Descriptive

plot_descriptive <- function(data, side="leftright") {
  
  if(side == "leftright") {
    x <- data[data$Error == 0 & data$Illusion_Side == 1, ]$Answer[1]
    x <- tools::toTitleCase(gsub("arrow", "", x))
    if(x == "Left") {
      data$Answer <- ifelse(data$Illusion_Side == 1, "Left", "Right")
    } else if(x == "Right") {
      data$Answer <- ifelse(data$Illusion_Side == 1, "Right", "Left")
    }
  } else {
    x <- data[data$Error == 0 & data$Illusion_Side == 1, ]$Answer[1]
    x <- tools::toTitleCase(gsub("arrow", "", x))
    if(x == "Up") {
      data$Answer <- ifelse(data$Illusion_Side == 1, "Up", "Down")
    } else if(x == "Down") {
      data$Answer <- ifelse(data$Illusion_Side == 1, "Down", "Up")
    }
    data$Answer <- fct_rev(data$Answer)
  }
  
  dodge1 <- 0.1 * diff(range(data$Illusion_Difference))
  dodge2 <- -0.1 * diff(range(data$Illusion_Strength))
  
  colors <- colorRampPalette(c("#4CAF50", "#009688", "#00BCD4", "#2196F3", "#3F51B5", "#673AB7", "#9C27B0"))(length(unique(data$Illusion_Strength)))
  
  p1 <- data |> 
    group_by(Illusion_Difference, Illusion_Strength, Answer) |> 
    summarize(Error = mean(Error)) |> 
    mutate(Illusion_Strength = as.factor(round(Illusion_Strength, 2))) |> 
    ggplot(aes(x = Illusion_Difference, y = Error)) +
    geom_bar(aes(fill=Illusion_Strength), position = position_dodge(width=dodge1), stat="identity") +
    geom_line(aes(color = Illusion_Strength), position = position_dodge(width=dodge1)) +
    scale_y_continuous(limits = c(0, 1), expand = c(0, 0), labels = scales::percent) +
    scale_x_continuous(expand = c(0, 0)) +
    scale_color_manual(values = colors) +
    scale_fill_manual(values = colors) +
    theme_modern() +
    labs(
      color = "Illusion Strength", 
      fill = "Illusion Strength",
      y = "Probability of Error",
      x = "Task Difficulty"
    ) +
    theme(plot.title = element_text(face = "bold", hjust = 0.5))
  
  colors <- colorRampPalette(c("#F44336", "#FFC107", "#4CAF50"))(length(unique(data$Illusion_Difference)))
    
  p2 <- data |> 
    group_by(Illusion_Difference, Illusion_Strength, Answer) |> 
    summarize(Error = mean(Error)) |> 
    mutate(Illusion_Difference = as.factor(round(Illusion_Difference, 2))) |> 
    ggplot(aes(x = Illusion_Strength, y = Error)) +
    geom_vline(xintercept=0, linetype="dotted", alpha=0.6) +
    geom_bar(aes(fill=Illusion_Difference), position = position_dodge(width=dodge2), stat="identity") +
    geom_line(aes(color = Illusion_Difference), position = position_dodge(width=dodge2)) +
    scale_y_continuous(limits = c(0, 1), expand = c(0, 0), labels = scales::percent) +
    scale_x_continuous(expand = c(0, 0)) +
    scale_color_manual(values = colors) +
    scale_fill_manual(values = colors) +
    theme_modern() +
    labs(
      color = "Task Difficulty", 
      fill = "Task Difficulty",
      y = "Probability of Error",
      x = "Illusion Strength"
    ) +
    theme(plot.title = element_text(face = "bold", hjust = 0.5)) 
  
  if(side == "leftright") {
    p <- ((p1 + facet_wrap(~Answer, ncol=2, labeller = "label_both")) /
      (p2 + facet_wrap(~Answer, ncol=2, labeller = "label_both"))) + 
    plot_annotation(title = paste(data$Illusion_Type[1], "Illusion"), 
                    theme = theme(plot.title = element_text(face = "bold", hjust = 0.5)))
  } else {
    p <- ((p1 + facet_wrap(~Answer, nrow=2, labeller = "label_both")) |
      (p2 + facet_wrap(~Answer, nrow=2, labeller = "label_both"))) + 
    plot_annotation(title = paste(data$Illusion_Type[1], "Illusion"), 
                    theme = theme(plot.title = element_text(face = "bold", hjust = 0.5)))
  }
  p
}

data <- filter(df, Illusion_Type == "Delboeuf")

plot_descriptive(data)

Model Selection

best_models <- function(data) {
  models <- list()
  for(i in 1:1) {
    for(j in 1:2) {
      for(k1 in c("", "_log")) { 
        for(k2 in c("")) {  
          for(side in c("", "-side")) {
            name <- paste0("dif", k1, i, "-", "str", k2, j, side)
            print(name)
            f <- paste0("poly(Illusion_Difference", 
                        k1,
                        ", ",
                        i,
                        ") * poly(Illusion_Strength",
                        k2, 
                        ", ",
                        j, 
                        ") + (1|Participant)")
            
            if(side == "-side") f <- paste0("Illusion_Side * ", f)
            
            m <- glmmTMB::glmmTMB(as.formula(paste0("Error ~ ", f)), 
                                  data = data, family = "binomial")
            if(performance::check_convergence(m)) {
              models[[name]] <- m
            }
          }
        }
      }
    }
  }

  to_keep <- compare_performance(models, metrics = c("BIC")) |> 
    arrange(BIC) |> 
    slice(1:10) |> 
    pull(Name)
  
  
  test <- test_performance(models[to_keep], reference=1)
  perf <- compare_performance(models[to_keep], metrics = c("BIC", "R2")) 
  
  merge(perf, test) |> 
    arrange(BIC) |> 
    select(Name, BIC, R2_marginal, BF) |> 
    mutate(BF = insight::format_bf(BF, name=""))
}

best_models(data)

Model Visualization

formula <- brms::bf(
  Error ~ log(Illusion_Difference) * poly(Illusion_Strength, 2) + 
    (1 | Participant),
  family = "bernoulli"
)

model <- brms::brm(formula,
  data = data,
  algorithm = "sampling",
  refresh = 0
)
## Running MCMC with 4 parallel chains...
## 
## Chain 4 finished in 9.8 seconds.
## Chain 1 finished in 10.1 seconds.
## Chain 2 finished in 10.0 seconds.
## Chain 3 finished in 10.3 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 10.1 seconds.
## Total execution time: 10.5 seconds.

parameters::parameters(model)
## Parameter                                      | Median |          95% CI |     pd | % in ROPE |  Rhat |     ESS
## ----------------------------------------------------------------------------------------------------------------
## (Intercept)                                    |  -3.78 | [ -4.09, -3.49] |   100% |        0% | 1.000 | 1996.00
## logIllusion_Difference                         |  -2.53 | [ -2.77, -2.30] |   100% |        0% | 0.999 | 5496.00
## polyIllusion_Strength21                        |  53.41 | [ 38.06, 68.91] |   100% |        0% | 1.000 | 2631.00
## polyIllusion_Strength22                        |  23.92 | [  9.61, 38.27] | 99.98% |        0% | 1.000 | 3510.00
## logIllusion_Difference:polyIllusion_Strength21 | -15.23 | [-31.81,  0.07] | 97.45% |     0.16% | 1.000 | 2892.00
## logIllusion_Difference:polyIllusion_Strength22 |  10.59 | [ -4.90, 26.32] | 91.60% |     0.50% | 1.000 | 3978.00
plot_model <- function(data, model) {
  data <- mutate(data, .dots_side = ifelse(Error == 1, "bottom", "top"))
  
  n_lines <- 5
  pred <- estimate_relation(model,
                            at = c("Illusion_Strength", "Illusion_Difference"),
                            length = c(25, n_lines)) |>
    mutate(Illusion_Difference = as.factor(round(Illusion_Difference, 2)))
  
  # Set colors for lines
  colors <- colorRampPalette(c("#F44336", "#FFC107", "#4CAF50"))(n_lines)
  diffvals <- as.numeric(as.character(unique(sort(pred$Illusion_Difference))))
  names(colors) <- diffvals
  
  # Assign color from the same palette to every observation of data (for geom_dots)
  closest <- diffvals[max.col(-abs(outer(data$Illusion_Difference, diffvals, "-")))]
  data$color <- colors[as.character(closest)]
  data$color <- fct_reorder(data$color, closest)
  
  # Manual jittering
  xrange <- 0.05*diff(range(data$Illusion_Strength))
  data$x <- data$Illusion_Strength 
  data$x[data$x > 0] <- data$x[data$x > 0] - runif(sum(data$x > 0), 0, xrange)
  data$x[data$x < 0] <- data$x[data$x < 0] + runif(sum(data$x < 0), 0, xrange)
  data$x[round(data$x, 2) == 0] <- data$x[round(data$x, 2) == 0] + runif(sum(round(data$x, 2) == 0), -xrange/2, xrange/2)
  
  
  pred |>
    ggplot(aes(x = Illusion_Strength, y = Predicted)) +
    geom_dots(
      data = mutate(data, 
                    Illusion_Strength = round(Illusion_Strength, 1)),
      aes(x=x, y = Error, group = Error, side = .dots_side, order=color), 
      fill = data$color,
      color = NA, 
      alpha=0.5) +
    geom_slab(data=data, aes(y = Error)) +
    geom_ribbon(aes(ymin = CI_low, ymax = CI_high, fill = Illusion_Difference, group = Illusion_Difference), alpha = 0.2) +
    geom_vline(xintercept = 0, linetype = "dashed") +
    geom_hline(yintercept = c(0.05, 0.5, 0.95), linetype = "dotted", alpha = 0.5) +
    geom_line(aes(color = Illusion_Difference, group = Illusion_Difference)) +
    scale_y_continuous(limits = c(0, 1), expand = c(0, 0), labels = scales::percent) +
    scale_x_continuous(expand = c(0, 0)) +
    scale_color_manual(values = colors) +
    scale_fill_manual(values = colors) +
    coord_cartesian(xlim=c(min(data$Illusion_Strength), max(data$Illusion_Strength))) +
    theme_modern() +
    labs(
      title = paste0(data$Illusion_Type[1], " Illusion"),
      color = "Difficulty", fill = "Difficulty",
      y = "Probability of Error",
      x = "Illusion Strength"
    ) +
    theme(plot.title = element_text(face = "bold", hjust = 0.5))
}

plot_model(data, model)

GAM

make_gam <- function(data) {
  
  formula <- brms::bf(
    Error ~ t2(Illusion_Difference, Illusion_Strength, bs = "cr", k=4) + 
      (1 | Participant),
    family = "bernoulli"
  )

  model <- brms::brm(formula,
    data = data,
    refresh = 0
  )
  
  list(p = plot_model(data, model), model = model)
}

gam <- make_gam(data)
## Running MCMC with 4 parallel chains...
## 
## Chain 3 finished in 28.4 seconds.
## Chain 4 finished in 33.9 seconds.
## Chain 1 finished in 34.4 seconds.
## Chain 2 finished in 35.7 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 33.1 seconds.
## Total execution time: 35.9 seconds.
gam$p

Parameter Standardization

std_params <- function(model, min=0, max=2) {
  estimate_relation(
  model,
    at = list(Illusion_Strength = c(0), 
              Illusion_Difference = seq(min, max, length.out=500)),
    ) |> 
    select(Illusion_Strength, Illusion_Difference, Error = Predicted) |> 
    slice(c(which.min(abs(Error - 0.005)), 
            which.min(abs(Error - 0.025)), 
            which.min(abs(Error - 0.25)))) |> 
    mutate(Error = insight::format_value(Error, as_percent=TRUE))
}

# range(data$Illusion_Difference)
# range(data$Illusion_Strength)

std_params(model, min=0, max=2)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |                1.54 |  0.50%
## 0.00              |                0.83 |  2.51%
## 0.00              |                0.32 | 24.70%
std_params(gam$model, min=0, max=2)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |                1.19 |  0.50%
## 0.00              |                0.77 |  2.50%
## 0.00              |                0.33 | 24.96%

Ebbinghaus

Descriptive

data <- filter(df, Illusion_Type == "Ebbinghaus")

plot_descriptive(data)

Model Selection

best_models(data)

Model Visualization

formula <- brms::bf(
  Error ~ log(Illusion_Difference) * poly(Illusion_Strength, 2) + 
    (1 | Participant),
  family = "bernoulli"
)

model <- brms::brm(formula,
  data = data,
  refresh = 0
)
## Running MCMC with 4 parallel chains...
## 
## Chain 4 finished in 9.8 seconds.
## Chain 3 finished in 10.1 seconds.
## Chain 1 finished in 10.8 seconds.
## Chain 2 finished in 11.4 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 10.5 seconds.
## Total execution time: 11.5 seconds.

parameters::parameters(model)
## Parameter                                      |  Median |            95% CI |     pd | % in ROPE |  Rhat |     ESS
## -------------------------------------------------------------------------------------------------------------------
## (Intercept)                                    |   -4.94 | [  -5.40,  -4.53] |   100% |        0% | 1.001 | 1392.00
## logIllusion_Difference                         |   -3.57 | [  -3.94,  -3.21] |   100% |        0% | 1.001 | 2542.00
## polyIllusion_Strength21                        |   45.08 | [  22.66,  69.97] |   100% |        0% | 1.004 | 1335.00
## polyIllusion_Strength22                        |   28.34 | [   8.53,  47.82] | 99.55% |        0% | 1.003 | 2353.00
## logIllusion_Difference:polyIllusion_Strength21 | -117.11 | [-143.16, -90.83] |   100% |        0% | 1.004 | 1648.00
## logIllusion_Difference:polyIllusion_Strength22 |   -9.89 | [ -33.77,  14.11] | 81.25% |     0.47% | 1.003 | 2533.00
plot_model(data, model)

GAM

gam <- make_gam(data)
## Running MCMC with 4 parallel chains...
## 
## Chain 4 finished in 30.4 seconds.
## Chain 2 finished in 30.6 seconds.
## Chain 3 finished in 31.1 seconds.
## Chain 1 finished in 31.5 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 30.9 seconds.
## Total execution time: 31.7 seconds.
gam$p

Parameter Standardization

# range(data$Illusion_Difference)
# range(data$Illusion_Strength)

std_params(model, min=0, max=2)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |                0.98 |  0.50%
## 0.00              |                0.61 |  2.48%
## 0.00              |                0.28 | 24.77%
std_params(gam$model, min=0, max=2)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |                0.94 |  0.80%
## 0.00              |                0.56 |  2.49%
## 0.00              |                0.28 | 25.09%

Rod and Frame

Descriptive

data <- filter(df, Illusion_Type == "Rod-Frame")

plot_descriptive(data)


data <- filter(data, abs(Illusion_Strength) < 15)

plot_descriptive(data)

Model Selection

best_models(data)

Model Visualization

formula <- brms::bf(
  Error ~ log(Illusion_Difference) * Illusion_Strength + 
    (1 | Participant),
  family = "bernoulli"
)

model <- brms::brm(formula,
  data = data,
  refresh = 0
)
## Running MCMC with 4 parallel chains...
## 
## Chain 2 finished in 12.2 seconds.
## Chain 3 finished in 12.6 seconds.
## Chain 1 finished in 13.2 seconds.
## Chain 4 finished in 13.4 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 12.8 seconds.
## Total execution time: 13.6 seconds.

parameters::parameters(model)
## Parameter                                |    Median |         95% CI |     pd | % in ROPE |  Rhat |     ESS
## ------------------------------------------------------------------------------------------------------------
## (Intercept)                              |     -2.24 | [-2.63, -1.89] |   100% |        0% | 1.006 |  735.00
## logIllusion_Difference                   |     -0.61 | [-0.67, -0.54] |   100% |        0% | 1.000 | 4180.00
## Illusion_Strength                        |      0.15 | [ 0.13,  0.16] |   100% |        0% | 1.000 | 4285.00
## logIllusion_Difference:Illusion_Strength | -3.19e-03 | [-0.01,  0.00] | 82.80% |      100% | 0.999 | 6201.00
plot_model(data, model)

GAM

gam <- make_gam(data)
## Running MCMC with 4 parallel chains...
## 
## Chain 3 finished in 19.2 seconds.
## Chain 2 finished in 19.3 seconds.
## Chain 1 finished in 19.8 seconds.
## Chain 4 finished in 20.7 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 19.7 seconds.
## Total execution time: 20.9 seconds.
gam$p

Parameter Standardization

# range(data$Illusion_Difference)
# range(data$Illusion_Strength)

std_params(model, min=0.01, max=12)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |               12.00 |  2.35%
## 0.00              |               10.82 |  2.50%
## 0.00              |                0.15 | 24.91%
std_params(gam$model, min=0.01, max=12)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |               12.00 |  0.80%
## 0.00              |                4.62 |  2.50%
## 0.00              |                0.56 | 24.84%

Vertical-Horizontal

Descriptive

data <- filter(df, Illusion_Type == "Vertical-Horizontal")

plot_descriptive(data)


data <- filter(data, abs(Illusion_Strength) < 90)

plot_descriptive(data)

Model Selection

best_models(data)

Model Visualization

formula <- brms::bf(
  Error ~ log(Illusion_Difference) * poly(Illusion_Strength, 2) + 
    (1 | Participant),
  family = "bernoulli"
)

model <- brms::brm(formula,
  data = data,
  refresh = 0
)
## Running MCMC with 4 parallel chains...
## 
## Chain 1 finished in 21.6 seconds.
## Chain 4 finished in 23.0 seconds.
## Chain 3 finished in 23.1 seconds.
## Chain 2 finished in 24.9 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 23.2 seconds.
## Total execution time: 25.1 seconds.

parameters::parameters(model)
## Parameter                                      | Median |            95% CI |     pd | % in ROPE |  Rhat |     ESS
## ------------------------------------------------------------------------------------------------------------------
## (Intercept)                                    |  -6.91 | [  -8.32,  -5.72] |   100% |        0% | 1.002 | 1094.00
## logIllusion_Difference                         |  -1.76 | [  -2.19,  -1.38] |   100% |        0% | 1.002 | 1112.00
## polyIllusion_Strength21                        | 181.63 | [  98.92, 276.63] |   100% |        0% | 1.003 | 1092.00
## polyIllusion_Strength22                        | -57.80 | [-107.17, -12.90] | 99.50% |        0% | 1.003 | 1136.00
## logIllusion_Difference:polyIllusion_Strength21 |  -1.61 | [ -28.86,  28.22] | 54.52% |     0.53% | 1.002 | 1113.00
## logIllusion_Difference:polyIllusion_Strength22 | -13.35 | [ -29.62,   2.91] | 94.17% |     0.29% | 1.002 | 1257.00
plot_model(data, model)

GAM

gam <- make_gam(data)
## Running MCMC with 4 parallel chains...
## 
## Chain 2 finished in 25.1 seconds.
## Chain 4 finished in 25.2 seconds.
## Chain 3 finished in 25.6 seconds.
## Chain 1 finished in 25.9 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 25.5 seconds.
## Total execution time: 26.0 seconds.
gam$p

Parameter Standardization

# range(data$Illusion_Difference)
# range(data$Illusion_Strength)

std_params(model, min=0, max=0.40)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |                0.40 |  1.25%
## 0.00              |                0.25 |  2.50%
## 0.00              |                0.04 | 24.82%
std_params(gam$model, min=0, max=0.40)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |                0.31 |  0.50%
## 0.00              |                0.20 |  2.50%
## 0.00              |                0.05 | 24.94%

Zöllner

Descriptive

data <- filter(df, Illusion_Type == "Zöllner")

plot_descriptive(data)


data <- filter(data, abs(Illusion_Strength) < 45)

plot_descriptive(data)

Model Selection

best_models(data)

Model Visualization

formula <- brms::bf(
  Error ~ log(Illusion_Difference) * Illusion_Strength + 
    (1 | Participant),
  family = "bernoulli"
)

model <- brms::brm(formula,
  data = data,
  refresh = 0
)
## Running MCMC with 4 parallel chains...
## 
## Chain 2 finished in 10.7 seconds.
## Chain 3 finished in 10.8 seconds.
## Chain 4 finished in 12.6 seconds.
## Chain 1 finished in 13.4 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 11.9 seconds.
## Total execution time: 13.4 seconds.

parameters::parameters(model)
## Parameter                                |    Median |         95% CI |     pd | % in ROPE |  Rhat |     ESS
## ------------------------------------------------------------------------------------------------------------
## (Intercept)                              |     -2.43 | [-2.71, -2.16] |   100% |        0% | 0.999 | 3869.00
## logIllusion_Difference                   |     -1.11 | [-1.26, -0.97] |   100% |        0% | 1.000 | 5131.00
## Illusion_Strength                        |      0.03 | [ 0.02,  0.04] |   100% |      100% | 0.999 | 5653.00
## logIllusion_Difference:Illusion_Strength | -7.10e-03 | [-0.01,  0.00] | 99.05% |      100% | 1.000 | 6267.00
plot_model(data, model)

Parameter Standardization

# range(data$Illusion_Difference)
# range(data$Illusion_Strength)

std_params(model, min=0, max=5)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |                5.00 |  1.48%
## 0.00              |                3.08 |  2.50%
## 0.00              |                0.30 | 25.20%

White

Descriptive

data <- filter(df, Illusion_Type == "White")

plot_descriptive(data)

Model Selection

best_models(data)

Model Visualization

formula <- brms::bf(
  Error ~ log(Illusion_Difference) * poly(Illusion_Strength, 2) + 
    (1 | Participant),
  family = "bernoulli"
)

model <- brms::brm(formula,
  data = data,
  refresh = 0
)
## Running MCMC with 4 parallel chains...
## 
## Chain 4 finished in 77.3 seconds.
## Chain 1 finished in 90.7 seconds.
## Chain 3 finished in 93.3 seconds.
## Chain 2 finished in 101.1 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 90.6 seconds.
## Total execution time: 101.3 seconds.

parameters::parameters(model)
## Parameter                                      |  Median |             95% CI |   pd | % in ROPE |  Rhat |    ESS
## -----------------------------------------------------------------------------------------------------------------
## (Intercept)                                    |  -10.13 | [ -12.94,   -7.54] | 100% |        0% | 0.999 | 916.00
## logIllusion_Difference                         |    2.11 | [   1.18,    3.05] | 100% |        0% | 0.999 | 993.00
## polyIllusion_Strength21                        | 1332.84 | [1127.00, 1554.84] | 100% |        0% | 1.000 | 923.00
## polyIllusion_Strength22                        | -598.45 | [-722.63, -483.16] | 100% |        0% | 1.000 | 944.00
## logIllusion_Difference:polyIllusion_Strength21 | -402.74 | [-480.52, -329.65] | 100% |        0% | 1.000 | 941.00
## logIllusion_Difference:polyIllusion_Strength22 |  222.57 | [ 180.21,  267.04] | 100% |        0% | 1.000 | 959.00
plot_model(data, model)

GAM

gam <- make_gam(data)
## Running MCMC with 4 parallel chains...
## 
## Chain 3 finished in 47.0 seconds.
## Chain 1 finished in 48.2 seconds.
## Chain 2 finished in 51.7 seconds.
## Chain 4 finished in 56.3 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 50.8 seconds.
## Total execution time: 56.5 seconds.
gam$p

Parameter Standardization

# range(data$Illusion_Difference)
# range(data$Illusion_Strength)
# unique(data$Illusion_Strength)

std_params(model, min=0, max=20)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |               20.00 |  0.75%
## 0.00              |                9.38 |  2.49%
## 0.00              |                1.96 | 24.84%
std_params(gam$model, min=0, max=20)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |               10.90 |  0.50%
## 0.00              |                6.01 |  2.50%
## 0.00              |                0.44 | 24.90%

Ponzo

Descriptive

data <- filter(df, Illusion_Type == "Ponzo")

plot_descriptive(data, side = "updown")

Model Selection

best_models(data)

Model Visualization

formula <- brms::bf(
  Error ~ log(Illusion_Difference) * poly(Illusion_Strength, 2) + 
    (1 | Participant),
  family = "bernoulli"
)

model <- brms::brm(formula,
  data = data,
  refresh = 0
)
## Running MCMC with 4 parallel chains...
## 
## Chain 3 finished in 15.1 seconds.
## Chain 2 finished in 15.4 seconds.
## Chain 4 finished in 15.8 seconds.
## Chain 1 finished in 16.1 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 15.6 seconds.
## Total execution time: 16.1 seconds.

parameters::parameters(model)
## Parameter                                      | Median |           95% CI |     pd | % in ROPE |  Rhat |     ESS
## -----------------------------------------------------------------------------------------------------------------
## (Intercept)                                    |  -5.21 | [ -5.73,  -4.74] |   100% |        0% | 1.001 | 2302.00
## logIllusion_Difference                         |  -1.34 | [ -1.52,  -1.15] |   100% |        0% | 1.000 | 3544.00
## polyIllusion_Strength21                        |  37.78 | [  9.36,  68.74] | 99.65% |        0% | 1.001 | 2624.00
## polyIllusion_Strength22                        |  77.95 | [ 53.34, 101.36] |   100% |        0% | 1.001 | 2692.00
## logIllusion_Difference:polyIllusion_Strength21 | -49.31 | [-63.62, -35.92] |   100% |        0% | 1.002 | 2624.00
## logIllusion_Difference:polyIllusion_Strength22 |  23.97 | [ 12.36,  35.05] |   100% |        0% | 1.001 | 2664.00
plot_model(data, model)

GAM

gam <- make_gam(data)
## Running MCMC with 4 parallel chains...
## 
## Chain 1 finished in 31.3 seconds.
## Chain 2 finished in 32.3 seconds.
## Chain 3 finished in 32.6 seconds.
## Chain 4 finished in 33.1 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 32.3 seconds.
## Total execution time: 33.3 seconds.
gam$p

Parameter Standardization

# range(data$Illusion_Difference)
# range(data$Illusion_Strength)

std_params(model, min=0, max=20)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |                0.52 |  0.49%
## 0.00              |                0.20 |  2.44%
## 0.00              |                0.04 | 28.46%
std_params(gam$model, min=0, max=20)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |                3.57 |  0.50%
## 0.00              |                0.20 |  2.69%
## 0.00              |                0.04 | 27.39%

Müller-Lyer

Descriptive

data <- filter(df, Illusion_Type == "Müller-Lyer")

plot_descriptive(data, side = "updown")

Model Selection

best_models(data)
## [1] "dif1-str1"
## [1] "dif1-str1-side"
## [1] "dif_log1-str1"
## [1] "dif_log1-str1-side"
## [1] "dif1-str2"
## [1] "dif1-str2-side"
## [1] "dif_log1-str2"
## [1] "dif_log1-str2-side"
## Random effect variances not available. Returned R2 does not account for random effects.
## Random effect variances not available. Returned R2 does not account for random effects.
##                 Name  BIC R2_marginal       BF
## 1      dif_log1-str2 2415       0.798         
## 2          dif1-str2 2417       0.799  = 0.517
## 3     dif1-str2-side 2505       0.829  < 0.001
## 4 dif_log1-str1-side 2534       0.748  < 0.001

Model Visualization

formula <- brms::bf(
  Error ~ log(Illusion_Difference) * poly(Illusion_Strength, 2) + 
    (1 | Participant),
  family = "bernoulli"
)

model <- brms::brm(formula,
  data = data,
  refresh = 0
)
## Running MCMC with 4 parallel chains...
## 
## Chain 1 finished in 18.2 seconds.
## Chain 4 finished in 18.6 seconds.
## Chain 2 finished in 19.7 seconds.
## Chain 3 finished in 22.1 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 19.7 seconds.
## Total execution time: 22.3 seconds.

parameters::parameters(model)
## Parameter                                      |  Median |            95% CI |     pd | % in ROPE |  Rhat |     ESS
## -------------------------------------------------------------------------------------------------------------------
## (Intercept)                                    |   -4.04 | [  -4.67,  -3.45] |   100% |        0% | 1.001 | 1329.00
## logIllusion_Difference                         |   -0.72 | [  -1.11,  -0.31] | 99.98% |        0% | 1.003 | 1166.00
## polyIllusion_Strength21                        |   28.52 | [ -14.97,  76.58] | 89.33% |     0.13% | 1.002 | 1415.00
## polyIllusion_Strength22                        |   33.69 | [   3.14,  64.07] | 98.32% |        0% | 1.002 | 1295.00
## logIllusion_Difference:polyIllusion_Strength21 | -130.20 | [-163.58, -99.95] |   100% |        0% | 1.002 | 1228.00
## logIllusion_Difference:polyIllusion_Strength22 |   32.36 | [  10.27,  56.59] | 99.90% |        0% | 1.002 | 1195.00
plot_model(data, model)

GAM

gam <- make_gam(data)
## Running MCMC with 4 parallel chains...
## 
## Chain 1 finished in 33.8 seconds.
## Chain 4 finished in 35.1 seconds.
## Chain 3 finished in 35.4 seconds.
## Chain 2 finished in 37.2 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 35.4 seconds.
## Total execution time: 37.3 seconds.
gam$p

Parameter Standardization

# range(data$Illusion_Difference)
# range(data$Illusion_Strength)

std_params(model, min=0, max=0.6)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |                0.60 |  1.92%
## 0.00              |                0.48 |  2.50%
## 0.00              |                0.06 | 25.17%
std_params(gam$model, min=0, max=0.6)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |                0.47 |  0.50%
## 0.00              |                0.26 |  2.51%
## 0.00              |                0.03 | 24.93%

Poggendorff

Descriptive

data <- filter(df, Illusion_Type == "Poggendorff")

plot_descriptive(data, side = "updown")


data <- filter(data, abs(Illusion_Strength) < 45)

plot_descriptive(data, side = "updown")

Model Selection

best_models(data)

Model Visualization

formula <- brms::bf(
  Error ~ log(Illusion_Difference) * poly(Illusion_Strength, 2) + 
    (1 | Participant),
  family = "bernoulli"
)

model <- brms::brm(formula,
  data = data,
  refresh = 0
)
## Running MCMC with 4 parallel chains...
## 
## Chain 1 finished in 9.6 seconds.
## Chain 2 finished in 10.3 seconds.
## Chain 4 finished in 10.4 seconds.
## Chain 3 finished in 10.6 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 10.2 seconds.
## Total execution time: 10.8 seconds.

parameters::parameters(model)
## Parameter                                      | Median |           95% CI |     pd | % in ROPE |  Rhat |     ESS
## -----------------------------------------------------------------------------------------------------------------
## (Intercept)                                    |  -4.79 | [ -5.39,  -4.24] |   100% |        0% | 1.001 | 1717.00
## logIllusion_Difference                         |  -1.03 | [ -1.21,  -0.86] |   100% |        0% | 1.001 | 3508.00
## polyIllusion_Strength21                        |  27.55 | [  2.73,  54.88] | 98.35% |        0% | 1.000 | 2775.00
## polyIllusion_Strength22                        |  27.98 | [  4.73,  50.87] | 99.00% |        0% | 1.000 | 2590.00
## logIllusion_Difference:polyIllusion_Strength21 | -31.08 | [-41.95, -20.41] |   100% |        0% | 1.000 | 2819.00
## logIllusion_Difference:polyIllusion_Strength22 |  -3.11 | [-12.52,   6.22] | 73.25% |     1.50% | 1.001 | 2634.00
plot_model(data, model)

GAM

gam <- make_gam(data)
## Running MCMC with 4 parallel chains...
## 
## Chain 1 finished in 18.5 seconds.
## Chain 4 finished in 19.5 seconds.
## Chain 2 finished in 20.2 seconds.
## Chain 3 finished in 20.2 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 19.6 seconds.
## Total execution time: 20.4 seconds.
gam$p

Parameter Standardization

# range(data$Illusion_Difference)
# range(data$Illusion_Strength)

std_params(model, min=0, max=0.5)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |                0.50 |  0.96%
## 0.00              |                0.18 |  2.50%
## 0.00              |                0.01 | 24.86%
std_params(gam$model, min=0, max=0.5)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |                0.27 |  1.69%
## 0.00              |                0.43 |  2.50%
## 0.00              |                0.00 | 10.18%

Contrast

Descriptive

data <- filter(df, Illusion_Type == "Contrast")

plot_descriptive(data, side = "updown")

Model Selection

best_models(data)

Model Visualization

formula <- brms::bf(
  Error ~ Illusion_Difference * poly(Illusion_Strength, 2) + 
    (1 | Participant),
  family = "bernoulli"
)

model <- brms::brm(formula,
  data = data,
  refresh = 0
)
## Running MCMC with 4 parallel chains...
## 
## Chain 3 finished in 22.5 seconds.
## Chain 1 finished in 24.0 seconds.
## Chain 4 finished in 24.7 seconds.
## Chain 2 finished in 24.9 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 24.0 seconds.
## Total execution time: 25.0 seconds.
plot_model(data, model)

GAM

gam <- make_gam(data)
## Running MCMC with 4 parallel chains...
## 
## Chain 2 finished in 33.2 seconds.
## Chain 3 finished in 33.3 seconds.
## Chain 4 finished in 33.5 seconds.
## Chain 1 finished in 37.1 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 34.3 seconds.
## Total execution time: 37.3 seconds.
gam$p

Parameter Standardization

# range(data$Illusion_Difference)
# range(data$Illusion_Strength)

std_params(model, min=0, max=25)
## Illusion_Strength | Illusion_Difference |  Error
## ------------------------------------------------
## 0.00              |               19.69 |  0.50%
## 0.00              |               12.32 |  2.49%
## 0.00              |                0.80 | 25.08%
std_params(gam$model, min=0, max=25)
## Illusion_Strength | Illusion_Difference | Error
## -----------------------------------------------
## 0.00              |               17.89 | 0.50%
## 0.00              |                8.42 | 2.50%
## 0.00              |                0.00 | 9.40%

Discussion

This study allowed to elaborate a more precise prior idea regarding the magnitude of the effects at stake and the type of interaction between them. Crucially, it allowed us to refine the range of task difficulty and illusion strength for the stimuli of the next study to maximize the information gain.